% scribe: Matias Damian Cattaneo % lastupdate: 12 November 2005 % lecture: 17 % references: Durrett, Section 2.6 % title: Poisson Processes -- Part I % keywords: Poisson distribution, Poisson process, counting process, point process, convolution semigroups, Levy, Khinchine, Levy-Khinchine formula, Levy-Khinchine theorem, infinite divisible law, Brownian motion, compound Poisson process % end \documentclass[12pt, letterpaper]{article} \include{macros} \begin{document} \lecture{17}{Poisson Processes -- Part I}{Matias Damian Cattaneo} {cattaneo@econ.berkeley.edu} \section{The Poisson Distribution} % keywords: Poisson distribution % end Define $S_{n}=X_{1}+X_{2}+...+X_{n}$. In a very simple setup, the $X_{i}$ are independent indicators with% \begin{eqnarray*} \P \!\left[ X_{i}=1\right] &=&p \\ \P \!\left[ X_{i}=0\right] &=&1-p \end{eqnarray*} We know that the distribution of $S_{n}$ is $\mbox{Binomial}\left(n,p\right)$. So we have \begin{eqnarray*} \P \!\left[ S_{n}=k\right] &=&\binom{n}{k}p^{k}\left( 1-p\right) ^{n-k}, \\ \E\!\left[ S_{n}\right] &=&np, \mbox{ and} \\ \var\!\left[ S_{n}\right] &=&np\left( 1-p\right). \end{eqnarray*} For fixed $p$, we have that% \begin{equation*} \frac{S_{n}-np}{\sqrt{np\left( 1-p\right) }}\dcv\mathcal{N}\left( 0,1\right) \end{equation*}% as $n\rightarrow \infty $. Now we let $n\rightarrow \infty$ and choose $p_n$ small so that $np_n=\lambda$. We see that% \begin{eqnarray*} \P \!\left[ S_{n}=0\right] &=&\!\left( 1-p\right) ^{n}=\left( 1-\frac{\lambda }{% n}\right) ^{n}\longrightarrow e^{-\lambda }, \\ \P \!\left[ S_{n}=1\right] &=&np\left( 1-p\right) ^{n-1}=\lambda \left( 1-% \frac{\lambda }{n}\right) ^{n-1}\longrightarrow \lambda e^{-\lambda }, \end{eqnarray*}% and in general% \begin{eqnarray*} \P \!\left[ S_{n}=k\right] &=&\binom{n}{k}p^{k}\left( 1-p\right) ^{n-k} \\ &\approx &\frac{n^{k}}{k!}p^{k}\left( 1-p\right) ^{n}\longrightarrow \frac{% \lambda ^{k}}{k!}e^{-\lambda}, \end{eqnarray*}% which is the mass function of a Poisson distribution. \begin{definition} The \emph{Poisson distribution} with parameter $\lambda $ is given by mass function% \begin{equation*} P_{\lambda }\!\left( k\right) =\frac{\lambda ^{k}}{k!}e^{-\lambda}. \end{equation*}% \end{definition} Observe that% \begin{equation*} \sum_{k=0}^{\infty }P_{\lambda }\!\left( k\right) =1 \end{equation*} Summing up, we see that as $n\rightarrow \infty$, if we let $p\rightarrow 0$ such that $np = \lambda \in \left[ 0,\infty \right)$,% \begin{equation*} S_{n}\dcv \mbox{Poisson}\!\left( \lambda \right) \end{equation*} Observe that we can relax the assumption that the indicators are identically distributed and extend this result to the triangular array setup. Let the $X_{n,i}$s be taken such that % \begin{equation*} \P \!\left[ X_{n,i}=1\right] =p_{n,i} \end{equation*} % Note we have $\E\!\left[ S_{n}\right] =\sum_{i=1}^{n}p_{ni}$ and assuming that as $n\rightarrow \infty $ we have $\sum_{i=1}^{n}p_{n,i} \longrightarrow \lambda$ and $\max_{i} p_{n,i} \longrightarrow 0$, we obtain the result:% \begin{equation*} S_{n}\dcv \mbox{Poisson}\!\left( \lambda \right). \end{equation*} The proof of this result is formalized in \cite{durrett}. Observe the following facts about the Poisson distribution: \begin{enumerate} \item As shown above, it is the limit of properly chosen binomial distributions. \item The sum of independent Poisson random variables with parameters $\lambda$ and $\upsilon$ is another Poisson random variable. This result can be written as follows: % \begin{equation*} \mbox{Poisson}\!\left( \lambda \right) \ast \mbox{Poisson}\!\left( \upsilon \right) =\mbox{Poisson}\!\left( \lambda +\upsilon \right), \end{equation*} % where we use the fact that for discrete distributions $P$ and $Q$ on $% \left\{ 0,1,2,...\right\}$,% \begin{equation*} \left( P+Q\right) \!\left( n\right) =\sum_{k=0}^{n}P\!\left( k\right) Q\!\left( n-k\right) \end{equation*}% for $n=0,1,2,...$, the convolution formula for the distribution of sums of independent random variables. \end{enumerate} \section{Basics of Poisson Processes} % keywords: Poisson process, point process % end We discuss the basics of Poisson processes here; for details, see \cite{durrett}. Consider the positive numbers divided up into intervals of length $10^{-6}$. We can think of this as time broken up into very short intervals. Consider a process $(X_1, X_2, \ldots)$ where $X_i$ is $1$ if a certain thing happens in the $i$th such time interval and $0$ otherwise. Suppose further that these $X_i$s are independent and are $1$ with probability $\lambda \cdot 10^{-6}$. The waiting time until the first occurrence of this thing is defined as% \begin{equation*} T_{1}=\left\{ first\text{ }n:\text{ }X_{i}=1\right\} \end{equation*}% here $X_{i}=1$ with probability $\frac{\lambda }{n}$. We see that% \begin{equation*} \P \!\left[ T_{1}>m\right] =\left( 1-p\right) ^{m}=\left[ \left( 1-p\right) ^{n}\right] ^{\frac{m}{n}}\longrightarrow e^{-\lambda \frac{m}{n}} \end{equation*}% as $m\rightarrow \infty $, and thus% \begin{equation*} \P \!\left[ \frac{T_{1}}{n}>t\right] =\P \!\left[ T_{1}>nt\right] \longrightarrow e^{-\lambda t} \end{equation*}% as $m\rightarrow \infty $. Notice that this is an exponential distribution. When $n=10^{6}\rightarrow \infty $ we obtain a \emph{point process} $(T_i)$, an increasing sequence of random variables where% \begin{eqnarray*} T_{1} &\thicksim &\mbox{Exponential}\!\left( \lambda \right) \\ T_{2}-T_{1} &\thicksim &\mbox{Exponential}\!\left( \lambda \right) \mbox{, independent of }T_{1} \\ &&\vdots \end{eqnarray*}% so we get $T_{1},\left( T_{2}-T_{1}\right) ,\left( T_{3}-T_{2}\right)$ are iid $\mbox{Exponential}\!\left( \lambda \right)$. Alternatively, the \emph{counting process} is defined by% % \begin{equation*} N_{t}=\#\left\{ i:T_{i}\leq t\right\} =\sum_{k=1}^{n}\1\left\{ T_{k}\leq t\right\}. \end{equation*}% % In general $\left\{ N_{t}\right\} _{t\geq 0}$ is called a \emph{Poisson process} with rate $\lambda$ on $\left( 0,\infty \right)$. It is not difficult to show that $N_{t}\thicksim \mbox{Poisson}\!\left( \lambda t\right)$. It is important to note that almost by construction this process $\left( N_{t},t\geq 0\right) $ has \emph{stationary independent increments}: that is, for $0t)=e^{-\lambda t}$, $t\geq 0$, we construct $(N_t, t\geq 0)$ by the formula:% \begin{eqnarray*} T_k &=& W_1 + \cdots + W_k \\ N_t &=& {\rm \ number\ of\ } \{ k: T_k\leq t \} \\ &=& \sum_{k=1}^{\infty} \1\{T_k\leq t\} \end{eqnarray*} Informally, the $\{ T_k \}$ are the points of the $PP(\lambda)$ denoted by $N$.\footnote{Friendly reference: "Probability", J.Pitman, Springer 1992.} The ``only if'' direction is easier. Suppose $N_t$ $\sim $ \mbox{Poisson} $(\lambda )$. By the Strong Markov Property (see \cite[section 5.2]{durrett}), we can deduce that $T_k -T_{k-1}$ are independent. We also can deduce: % $\P(T_k >t)=\P(N_t \varepsilon \right]=0 \end{equation*}% for all $\varepsilon >0$ \end{definition} \begin{theorem} [L\'evy-Khinchine] There is a 1-1 correspondence between infinitely divisible distributions $F$ and weakly continuous convolution semigroups $\left( F_{t},t\geq 0\right)$ with $F_{1}=F$. \end{theorem} \begin{corollary} Every infinitely divisible law is associated with a process with stationary independent increments which is continuous in P as t varies. \end{corollary} For a proof see \cite{feller1} or \cite{kall} Now we present another example. Consider accidents that occur at times of Poisson Process with rate $\lambda $. At the time of the $k$th accident let there be some variable $X_{k}$ like the damage costs covered by insurance companies. We have% \begin{equation*} Y_{t}=\sum_{k=1}^{N_{t}}X_{k} \end{equation*}% where $Y_{t}$ is the total cost/magnitude up to time $t$. It is not difficult to show that $\left( Y_{t},t\geq 0\right) $ has stationary independent increments. We look at the distribution of $Y_{t}$. This is called a \emph{compound Poisson process}. It is hard to describe explicitly, but we can compute its characteristic function. \begin{exercise} Find a formula for the characteristic function of $Y_{t}$ in terms of the rate $\lambda$ of $N_{t}$, and the distribution $F$ of $X_{t}$, that is,% \begin{equation*} F\!\left( B\right) =\P \!\left[ X_{k}\in B\right]. \end{equation*} \end{exercise} Observe we have% \begin{eqnarray*} \E\!\left[ \exp \left\{ i\theta Y_{t}\right\} \right] &=&\sum_{n=1}^{N_{t}}\E\!\left[ \exp \left\{ i\theta Y_{t}\right\} \cdot \1\left\{ N_{t}=n\right\} \right] \\ &=&\sum_{n=1}^{N_{t}}\E\!\left[ \exp \left\{ i\theta \left( X_{1}+X_{2}+...+X_{n}\right) \right\} \cdot \1\left\{ N_{t}=n\right\} \right] \\ &=&\sum_{n=1}^{N_{t}}\E\!\left[ \exp \left\{ i\theta \left( X_{1}+X_{2}+...+X_{n}\right) \right\} \right] \cdot \E\!\left[ \mathbf{% 1}\left\{ N_{t}=n\right\} \right] \\ &=&\sum_{n=1}^{N_{t}}\left( \E\!\left[ \exp \left\{ i\theta X\right\} \right] \right) ^{n}\cdot \frac{e^{-\lambda t}\left( \lambda t\right) ^{n}}{n!} \\ &=&e^{-\lambda t}\exp \left\{ \lambda t\E\!\left[ e^{i\theta X}\right] \right\} \\ &=&\exp \left\{ \lambda t\int_{\R}\left( e^{i\theta x}-1\right) \P \!\left[ X\in dx\right] \right\} \\ &=&\exp \left\{ t\int_{\R}\left( e^{i\theta x}-1\right) L\!\left( dx\right) \right\} \end{eqnarray*}% where we let $L\!\left( \cdot \right) =\lambda \P \!\left[ X\in \cdot \right] $% , which is called the \emph{L\'evy Measure} associated with the process. It is important to note that this is an instance of the \emph{% L\'evy-Khinchine} formula which gives the characteristic function of the most general $\infty $-divisible law. In the next subsection we include more details on this. \subsection{Computation of an instance of LK Formula} Given a P.P.P. $N$, say $N(B) = \sum_i \1\{X_i \in B\}$. For some sequence of random variables $X_i$. Consider positive and measurable $f$: \begin{equation} \int f dN\ =\ \sum_i f(X_i) \end{equation} has clear intuitive meanings and many applications. \begin{example} $X_i$ could be the arrival time, location, and magnitude of an earthquake: $$X_i\ =\ (T_i, M_i, Y_i).$$ $f(t,m,y)$ represents the cost to the insurance company incurred by an earthquake at time t with magnitude m in place y. \end{example} How do we describe its distribution? Consider the case where $f$ is a simple function. Say $f = \sum_{i= 1}^m x_i \1\{B_i\}$ where the $B_i$'s are disjoint events and cover the space. Then \begin{equation} \int f dN\ =\ \sum_i x_i N(B_i) \end{equation} where $N(B_i)$ are independent r.v.s with $\mbox{Poisson}(\mu (B_i))$ distribution.\\ This is some new infinitely divisible distribution. Now we need a transformation. Because $f\geq 0$, it is natural to look first at the Laplace transformation. Take $\theta >0$: \begin{eqnarray} \E\!\left[e^{-\theta \int f dN}\right] &=& \E\!\left[e^{-\theta \sum_i x_i N(B_i)}\right] \\ &=& \prod_i E\!\left[e^{-\theta x_i N(B_i)}\right] \label{lap4}\\ &=& \prod_i \exp \!\left[ -\mu(B_i)(1-e^{-\theta x_i}) \right] \label{lap5} \\ &=& \exp \!\left[ -\sum_i \mu(B_i)(1-e^{-\theta x_i}) \right]\\ &=& \exp \!\left[ -\int (1-e^{-\theta f(s)})\mu (ds) \right] \end{eqnarray} $\ref{lap4} implies \ref{lap5}$ because $N(B_i)\sim \mbox{Poisson} (\mu (B_i))$, and if $N\sim \mbox{Poisson} (\lambda)$, then \begin{eqnarray*} \E(e^{-\theta N}) &=& \sum_{n=0}^{\infty} (e^{-\theta})^n \frac{\lambda ^n}{n !} e^{-\lambda}\\ &=& e^{-\lambda} e^{(e^{-\theta}\lambda)}\\ &=& \exp \!\left[ -\lambda (1-e^{-\theta}) \right] \end{eqnarray*} \begin{theorem} For every non-negative measurable function $f$, we have, writing $e^{-\infty}:=0$: \begin{equation} \E\!\left[e^{-\theta \int f dN}\right] = \exp \!\left[ \int (e^{-\theta f(s)}-1)\mu (ds) \right] \end{equation} This formula is an instance of the L\'evy-Khinchine equation. \end{theorem} \begin{proof} We have shown that the result holds when $f$ is a simple function. In general, there exist sequence of simple functions $f_n$ such that $f_n \uparrow f$. Then apply the Monotone Convergence Theorem and Dominated Convergence Theorem. \end{proof} \bibliographystyle{plain} \bibliography{../books.bib} \end{document}